import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
dataset=pd.read_csv('price_prediction_new.csv')
df=dataset.drop(['train_id','Unnamed: 10'], axis=1)
df.head(2)
name item_condition_id category_name brand_name number_of_item_sold days_launch price shipping item_description
0 MLB Cincinnati Reds T Shirt Size XL 3 Men/Tops/T-shirts NaN 1214 67 10.0 1 No description yet
1 Razer BlackWidow Chroma Keyboard 3 Electronics/Computers & Tablets/Components & P... Razer 2366 41 52.0 0 This keyboard is in great condition and works ...
def handle_missing(dataset):
    dataset.category_name.fillna(value="missing", inplace=True)
    dataset.brand_name.fillna(value="missing", inplace=True)
    dataset.item_description.fillna(value="missing", inplace=True)
    return (dataset)

data = handle_missing(df)
#PROCESS CATEGORICAL DATA
le = LabelEncoder()
data.category_name = le.fit_transform(data.category_name)
data.brand_name = le.fit_transform(data.brand_name)
data.head(2)
name item_condition_id category_name brand_name number_of_item_sold days_launch price shipping item_description
0 MLB Cincinnati Reds T Shirt Size XL 3 296 552 1214 67 10.0 1 No description yet
1 Razer BlackWidow Chroma Keyboard 3 52 412 2366 41 52.0 0 This keyboard is in great condition and works ...
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
columns=['number_of_item_sold','days_launch']
data[columns] = sc.fit_transform(data[columns])
sc_price=StandardScaler()
data[['price']] = sc_price.fit_transform(data[['price']])
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
raw_text = np.hstack([data.item_description.str.lower(), data.name.str.lower()])
tokenizer = Tokenizer()
tokenizer.fit_on_texts(raw_text)
data["seq_item_description"] = tokenizer.texts_to_sequences(data.item_description.str.lower())
data["seq_name"] = tokenizer.texts_to_sequences(data.name.str.lower())
data.head(3)
name item_condition_id category_name brand_name number_of_item_sold days_launch price shipping item_description seq_item_description seq_name
0 MLB Cincinnati Reds T Shirt Size XL 3 296 552 -0.503870 0.726913 -0.400241 1 No description yet [12, 62, 71] [2955, 7310, 4207, 256, 89, 7, 203]
1 Razer BlackWidow Chroma Keyboard 3 52 412 1.510132 -0.388936 0.621458 0 This keyboard is in great condition and works ... [24, 2005, 10, 6, 41, 18, 1, 183, 49, 21, 909,... [3568, 7311, 12164, 2005]
2 AVA-VIV Blouse 1 500 474 0.943694 -1.161447 -0.400241 1 Adorable top with a hint of lace and a key hol... [771, 58, 9, 4, 4230, 11, 198, 1, 4, 735, 1185... [12165, 12166, 490]
max_name_seq_length = data['seq_name'].map(len).max()
max_seq_item_description_length = data['seq_item_description'].map(len).max()
MAX_TEXT = np.max(np.concatenate([np.concatenate(data.seq_name),np.concatenate(data.seq_name)]))+1
MAX_CATEGORY = np.max(np.concatenate([data.category_name]))+1
MAX_BRAND = np.max(np.concatenate([data.brand_name]))+1
MAX_CONDITION = np.max(np.concatenate([data.item_condition_id]))+1
print("max name seq length "+str(max_name_seq_length))
print("max item desc seq length "+str(max_seq_item_description_length))
print("max text "+str(MAX_TEXT))
print("max category "+str(MAX_CATEGORY))
print("max brand "+str(MAX_BRAND))
print("max max condition "+str(MAX_CONDITION))
max name seq length 10
max item desc seq length 194
max text 13568
max category 532
max brand 558
max max condition 6
# split data
dataset=data.drop(['name','item_description'], axis=1)
data_train, data_test = train_test_split(dataset, random_state=123, train_size=0.8)
from keras.preprocessing.sequence import pad_sequences

def get_keras_data(dataset):
    X = {
        'name': pad_sequences(dataset['seq_name'], maxlen=10, padding='post'),
        'item_desc': pad_sequences(dataset['seq_item_description'], maxlen=50, padding='post'),
        'brand_name': np.array(dataset['brand_name']),
        'category_name': np.array(dataset['category_name']),
        'item_condition': np.array(dataset['item_condition_id']),
        'shipping': np.array(dataset['shipping']),
        'number_of_item_sold': np.array(dataset['number_of_item_sold']),
        'days_launch': np.array(dataset['days_launch'])
    }
    return X
X_train = get_keras_data(data_train)
X_test = get_keras_data(data_test)
y_train = data_train['price'].values
y_test = data_test['price'].values
import tensorflow as tf
import keras_tuner as kt
from tensorflow.keras.layers import Input, Embedding, GRU, Dense, Dropout, Flatten, BatchNormalization, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

def build_model(hp):
    # Hyperparameters
    learning_rate = hp.Float('learning_rate', min_value=0.0001, max_value=0.01, sampling='log')
    l2_regularizer = hp.Float('l2_regularizer', min_value=0.0001, max_value=0.01, sampling='log')
    kernel_initializer = hp.Choice('kernel_initializer', values=['he_uniform', 'glorot_uniform', 'lecun_normal'])
    activation = hp.Choice('activation', values=['relu', 'tanh'])
    dropout_rate = hp.Float('dropout_rate', min_value=0.2, max_value=0.5, step=0.1)
    GRU_units = hp.Int('GRU_units', min_value=32, max_value=256, step=32)
    dense_units = hp.Int('dense_units', min_value=32, max_value=128, step=32)

    # Inputs 
    input_name = Input(shape=(10,), name='name')
    input_item_desc = Input(shape=(50,), name='item_desc')
    input_brand = Input(shape=(1,), name='brand_name')
    input_category = Input(shape=(1,), name='category_name')
    input_condition = Input(shape=(1,), name='item_condition')
    input_shipping = Input(shape=(1,), name='shipping')
    input_num_sold = Input(shape=(1,), name='number_of_item_sold')
    input_days_launch = Input(shape=(1,), name='days_launch')

    # Embedding 
    embedding_name = Embedding(input_dim=MAX_TEXT, output_dim=50)(input_name)
    embedding_item_desc = Embedding(input_dim=MAX_TEXT, output_dim=50)(input_item_desc)

    # GRU Layers
    rnn_layer_item_desc = GRU(GRU_units, return_sequences=False, 
                               kernel_initializer=kernel_initializer,
                               kernel_regularizer=l2(l2_regularizer))(embedding_item_desc)
    rnn_layer_item_desc = Dropout(dropout_rate)(rnn_layer_item_desc)

    rnn_layer_name = GRU(GRU_units, return_sequences=False, 
                          kernel_initializer=kernel_initializer,
                          kernel_regularizer=l2(l2_regularizer))(embedding_name)
    rnn_layer_name = Dropout(dropout_rate)(rnn_layer_name)

    # Categorical Embedding layers
    embedding_brand = Embedding(input_dim=1000, output_dim=10)(input_brand)
    embedding_category = Embedding(input_dim=1000, output_dim=10)(input_category)
    embedding_condition = Embedding(input_dim=10, output_dim=5)(input_condition)
    embedding_shipping = Embedding(input_dim=10, output_dim=5)(input_shipping)   
    flatten_brand = Flatten()(embedding_brand)
    flatten_category = Flatten()(embedding_category)
    flatten_condition = Flatten()(embedding_condition)
    flatten_shipping = Flatten()(embedding_shipping)

    # Concatenate
    concat = concatenate([rnn_layer_item_desc, rnn_layer_name, flatten_brand, flatten_category, 
                          flatten_condition, flatten_shipping, input_num_sold, input_days_launch])
   
    dense1 = Dense(dense_units, activation=activation, kernel_initializer=kernel_initializer, 
                   kernel_regularizer=l2(l2_regularizer))(concat)
    dense1 = Dropout(dropout_rate)(dense1)
    
    dense2 = Dense(dense_units, activation=activation, kernel_initializer=kernel_initializer, 
                   kernel_regularizer=l2(l2_regularizer))(dense1)
    dense2 = Dropout(dropout_rate)(dense2)
    
    output = Dense(1)(dense2)  
   
    model = Model(inputs=[input_name, input_item_desc, input_brand, input_category, 
                          input_condition, input_shipping, input_num_sold, input_days_launch],
                  outputs=output)
    optimizer = Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=['mae'])
    
    return model
    
# Create the tuner
tuner = kt.RandomSearch(
    build_model,  # Model-building function
    objective='val_loss',  # Objective to optimize
    max_trials=10,  # Number of trials to try
    directory='experiment',
    )
tuner.search(X_train,y_train , epochs=30, validation_data=(X_test,y_test))
Trial 10 Complete [00h 07m 49s]
val_loss: 0.6570454239845276

Best val_loss So Far: 0.4438929259777069
Total elapsed time: 01h 29m 34s
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
best_hps.values
{'learning_rate': 0.0026052762465815374,
 'l2_regularizer': 0.0001584976160636753,
 'kernel_initializer': 'lecun_normal',
 'activation': 'tanh',
 'dropout_rate': 0.2,
 'GRU_units': 224,
 'dense_units': 128}
best_model = tuner.get_best_models(num_models=1)[0]
val_loss, val_mae = best_model.evaluate(X_test, y_test)
print("\nBest Model Validation mae:", val_mae)
print("\nBest Model Validation Loss:", val_loss) 
36/36 ━━━━━━━━━━━━━━━━━━━━ 6s 56ms/step - loss: 0.3902 - mae: 0.3918

Best Model Validation mae: 0.39053598046302795

Best Model Validation Loss: 0.4438929259777069
history_best = best_model.fit( X_train, y_train, epochs=100, validation_data=(X_test, y_test))
Epoch 1/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 40s 134ms/step - loss: 0.5528 - mae: 0.3164 - val_loss: 0.5039 - val_mae: 0.4328
Epoch 2/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 129ms/step - loss: 0.2290 - mae: 0.2514 - val_loss: 0.7382 - val_mae: 0.5521
Epoch 3/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 131ms/step - loss: 0.5064 - mae: 0.3301 - val_loss: 0.5642 - val_mae: 0.4719
Epoch 4/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 130ms/step - loss: 0.2110 - mae: 0.2511 - val_loss: 0.6809 - val_mae: 0.5430
Epoch 5/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 128ms/step - loss: 0.5980 - mae: 0.3332 - val_loss: 0.4831 - val_mae: 0.4238
Epoch 6/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - loss: 0.2067 - mae: 0.2412 - val_loss: 0.5218 - val_mae: 0.4441
Epoch 7/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 133ms/step - loss: 0.8577 - mae: 0.3487 - val_loss: 0.4777 - val_mae: 0.4111
Epoch 8/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 135ms/step - loss: 0.2023 - mae: 0.2150 - val_loss: 0.5240 - val_mae: 0.4845
Epoch 9/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 128ms/step - loss: 0.4624 - mae: 0.2953 - val_loss: 0.4598 - val_mae: 0.3908
Epoch 10/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 125ms/step - loss: 0.1968 - mae: 0.2263 - val_loss: 0.4684 - val_mae: 0.3674
Epoch 11/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 121ms/step - loss: 0.2729 - mae: 0.2797 - val_loss: 0.6929 - val_mae: 0.5179
Epoch 12/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.5451 - mae: 0.3020 - val_loss: 0.4800 - val_mae: 0.3810
Epoch 13/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 132ms/step - loss: 0.1957 - mae: 0.2114 - val_loss: 0.6807 - val_mae: 0.4601
Epoch 14/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 127ms/step - loss: 0.1749 - mae: 0.2307 - val_loss: 0.6297 - val_mae: 0.4624
Epoch 15/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 126ms/step - loss: 0.1996 - mae: 0.2391 - val_loss: 0.4502 - val_mae: 0.3634
Epoch 16/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 126ms/step - loss: 0.1696 - mae: 0.2292 - val_loss: 0.4895 - val_mae: 0.4012
Epoch 17/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 126ms/step - loss: 0.2056 - mae: 0.2552 - val_loss: 0.4290 - val_mae: 0.3552
Epoch 18/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 128ms/step - loss: 0.2719 - mae: 0.2744 - val_loss: 0.6431 - val_mae: 0.5373
Epoch 19/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.4936 - mae: 0.3367 - val_loss: 0.4568 - val_mae: 0.3606
Epoch 20/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.1739 - mae: 0.2125 - val_loss: 0.6498 - val_mae: 0.5544
Epoch 21/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 1.2021 - mae: 0.5456 - val_loss: 0.7228 - val_mae: 0.4826
Epoch 22/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.8340 - mae: 0.3607 - val_loss: 0.6372 - val_mae: 0.4085
Epoch 23/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.3686 - mae: 0.2516 - val_loss: 0.5772 - val_mae: 0.3984
Epoch 24/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.2251 - mae: 0.2191 - val_loss: 0.4851 - val_mae: 0.3767
Epoch 25/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 128ms/step - loss: 0.3489 - mae: 0.2499 - val_loss: 0.4669 - val_mae: 0.3575
Epoch 26/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 125ms/step - loss: 0.2220 - mae: 0.1912 - val_loss: 0.5066 - val_mae: 0.4193
Epoch 27/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 127ms/step - loss: 0.2633 - mae: 0.2010 - val_loss: 0.4960 - val_mae: 0.3965
Epoch 28/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 126ms/step - loss: 0.3584 - mae: 0.2190 - val_loss: 0.4906 - val_mae: 0.3702
Epoch 29/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 128ms/step - loss: 0.1801 - mae: 0.1833 - val_loss: 0.5774 - val_mae: 0.4246
Epoch 30/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 22s 137ms/step - loss: 0.2684 - mae: 0.2113 - val_loss: 0.4447 - val_mae: 0.3701
Epoch 31/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 127ms/step - loss: 0.1367 - mae: 0.1824 - val_loss: 0.6582 - val_mae: 0.4914
Epoch 32/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 136ms/step - loss: 0.2776 - mae: 0.2371 - val_loss: 0.5800 - val_mae: 0.4218
Epoch 33/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 128ms/step - loss: 0.1902 - mae: 0.2090 - val_loss: 0.4432 - val_mae: 0.3679
Epoch 34/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 126ms/step - loss: 0.1823 - mae: 0.1930 - val_loss: 0.5270 - val_mae: 0.4467
Epoch 35/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 122ms/step - loss: 0.1922 - mae: 0.2075 - val_loss: 0.4427 - val_mae: 0.3788
Epoch 36/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.2385 - mae: 0.2290 - val_loss: 0.5512 - val_mae: 0.4437
Epoch 37/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.3894 - mae: 0.2505 - val_loss: 0.5290 - val_mae: 0.3926
Epoch 38/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 135ms/step - loss: 0.4047 - mae: 0.2393 - val_loss: 0.4970 - val_mae: 0.3987
Epoch 39/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 133ms/step - loss: 0.1942 - mae: 0.1956 - val_loss: 0.4560 - val_mae: 0.3769
Epoch 40/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 128ms/step - loss: 0.2100 - mae: 0.1906 - val_loss: 0.4721 - val_mae: 0.3920
Epoch 41/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 128ms/step - loss: 0.1286 - mae: 0.1708 - val_loss: 0.5286 - val_mae: 0.4305
Epoch 42/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 127ms/step - loss: 0.2424 - mae: 0.1892 - val_loss: 0.5282 - val_mae: 0.4064
Epoch 43/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 125ms/step - loss: 0.2240 - mae: 0.1991 - val_loss: 0.5964 - val_mae: 0.4590
Epoch 44/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 127ms/step - loss: 0.2374 - mae: 0.2156 - val_loss: 0.4979 - val_mae: 0.4045
Epoch 45/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 128ms/step - loss: 0.1560 - mae: 0.1875 - val_loss: 0.4745 - val_mae: 0.4118
Epoch 46/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 126ms/step - loss: 0.4160 - mae: 0.2838 - val_loss: 0.4570 - val_mae: 0.3893
Epoch 47/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 132ms/step - loss: 0.1395 - mae: 0.1832 - val_loss: 0.5680 - val_mae: 0.4548
Epoch 48/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 125ms/step - loss: 0.1595 - mae: 0.2216 - val_loss: 0.5104 - val_mae: 0.4143
Epoch 49/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 22s 131ms/step - loss: 0.1900 - mae: 0.2500 - val_loss: 0.5117 - val_mae: 0.4396
Epoch 50/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.2701 - mae: 0.2611 - val_loss: 0.5528 - val_mae: 0.3978
Epoch 51/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 131ms/step - loss: 0.1945 - mae: 0.1949 - val_loss: 0.5018 - val_mae: 0.4012
Epoch 52/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 126ms/step - loss: 0.2982 - mae: 0.2208 - val_loss: 0.5307 - val_mae: 0.4242
Epoch 53/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 125ms/step - loss: 0.5615 - mae: 0.3188 - val_loss: 0.5108 - val_mae: 0.3927
Epoch 54/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 130ms/step - loss: 0.2426 - mae: 0.2308 - val_loss: 0.5118 - val_mae: 0.4056
Epoch 55/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 123ms/step - loss: 0.3412 - mae: 0.2589 - val_loss: 0.5578 - val_mae: 0.4266
Epoch 56/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 125ms/step - loss: 0.5717 - mae: 0.2776 - val_loss: 0.4742 - val_mae: 0.3672
Epoch 57/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 122ms/step - loss: 0.2336 - mae: 0.1917 - val_loss: 0.5605 - val_mae: 0.4023
Epoch 58/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 123ms/step - loss: 0.2262 - mae: 0.2013 - val_loss: 0.4668 - val_mae: 0.3979
Epoch 59/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 117ms/step - loss: 0.4260 - mae: 0.2310 - val_loss: 0.4621 - val_mae: 0.3832
Epoch 60/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 118ms/step - loss: 0.2653 - mae: 0.1948 - val_loss: 0.5304 - val_mae: 0.4329
Epoch 61/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 23s 131ms/step - loss: 0.2283 - mae: 0.2338 - val_loss: 0.4462 - val_mae: 0.3655
Epoch 62/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 121ms/step - loss: 0.1916 - mae: 0.2000 - val_loss: 0.4646 - val_mae: 0.3958
Epoch 63/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 129ms/step - loss: 0.2258 - mae: 0.2202 - val_loss: 0.4610 - val_mae: 0.3855
Epoch 64/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 124ms/step - loss: 0.3751 - mae: 0.2462 - val_loss: 0.4957 - val_mae: 0.4045
Epoch 65/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 124ms/step - loss: 0.1594 - mae: 0.1952 - val_loss: 0.5814 - val_mae: 0.5157
Epoch 66/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 129ms/step - loss: 0.3138 - mae: 0.2694 - val_loss: 0.4497 - val_mae: 0.3902
Epoch 67/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 125ms/step - loss: 0.1957 - mae: 0.1970 - val_loss: 0.5568 - val_mae: 0.4638
Epoch 68/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.1900 - mae: 0.1900 - val_loss: 0.5248 - val_mae: 0.4504
Epoch 69/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 130ms/step - loss: 0.4892 - mae: 0.2483 - val_loss: 0.5931 - val_mae: 0.4532
Epoch 70/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.1385 - mae: 0.1874 - val_loss: 0.5337 - val_mae: 0.4312
Epoch 71/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 122ms/step - loss: 0.2422 - mae: 0.2270 - val_loss: 0.5043 - val_mae: 0.4278
Epoch 72/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 129ms/step - loss: 0.1975 - mae: 0.1900 - val_loss: 0.4836 - val_mae: 0.4082
Epoch 73/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 121ms/step - loss: 0.2042 - mae: 0.2012 - val_loss: 0.4329 - val_mae: 0.3786
Epoch 74/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 128ms/step - loss: 0.2347 - mae: 0.1917 - val_loss: 0.4260 - val_mae: 0.3782
Epoch 75/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.1515 - mae: 0.1753 - val_loss: 0.4432 - val_mae: 0.3948
Epoch 76/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.1264 - mae: 0.1728 - val_loss: 0.4708 - val_mae: 0.3989
Epoch 77/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 22s 130ms/step - loss: 0.3347 - mae: 0.2693 - val_loss: 0.4503 - val_mae: 0.3676
Epoch 78/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.2117 - mae: 0.2136 - val_loss: 0.5579 - val_mae: 0.4419
Epoch 79/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.2422 - mae: 0.2370 - val_loss: 0.4942 - val_mae: 0.4259
Epoch 80/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 129ms/step - loss: 0.2877 - mae: 0.2047 - val_loss: 0.4335 - val_mae: 0.3946
Epoch 81/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 20s 122ms/step - loss: 0.2485 - mae: 0.1873 - val_loss: 0.4287 - val_mae: 0.3725
Epoch 82/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.1904 - mae: 0.1796 - val_loss: 0.5111 - val_mae: 0.4465
Epoch 83/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.1428 - mae: 0.1864 - val_loss: 0.4410 - val_mae: 0.3734
Epoch 84/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 122ms/step - loss: 0.2221 - mae: 0.2062 - val_loss: 0.4795 - val_mae: 0.4129
Epoch 85/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 124ms/step - loss: 0.5025 - mae: 0.2170 - val_loss: 0.4594 - val_mae: 0.3910
Epoch 86/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.2928 - mae: 0.2517 - val_loss: 0.4624 - val_mae: 0.4038
Epoch 87/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 122ms/step - loss: 0.1758 - mae: 0.1862 - val_loss: 0.4776 - val_mae: 0.4144
Epoch 88/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 122ms/step - loss: 0.1419 - mae: 0.1796 - val_loss: 0.6175 - val_mae: 0.5185
Epoch 89/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.3441 - mae: 0.2631 - val_loss: 0.5183 - val_mae: 0.4627
Epoch 90/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 19s 129ms/step - loss: 0.2347 - mae: 0.2258 - val_loss: 0.4972 - val_mae: 0.4238
Epoch 91/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.2071 - mae: 0.2023 - val_loss: 0.5210 - val_mae: 0.4377
Epoch 92/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.2393 - mae: 0.2197 - val_loss: 0.4733 - val_mae: 0.4084
Epoch 93/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.1571 - mae: 0.1822 - val_loss: 0.4541 - val_mae: 0.3853
Epoch 94/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.1317 - mae: 0.1746 - val_loss: 0.4569 - val_mae: 0.4166
Epoch 95/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 21s 123ms/step - loss: 0.1549 - mae: 0.1964 - val_loss: 0.4831 - val_mae: 0.4180
Epoch 96/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.1326 - mae: 0.1872 - val_loss: 0.4812 - val_mae: 0.4277
Epoch 97/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 17s 122ms/step - loss: 0.1459 - mae: 0.2083 - val_loss: 0.4458 - val_mae: 0.4073
Epoch 98/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 122ms/step - loss: 0.1135 - mae: 0.1732 - val_loss: 0.4803 - val_mae: 0.4355
Epoch 99/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.3248 - mae: 0.2487 - val_loss: 0.5115 - val_mae: 0.4290
Epoch 100/100
143/143 ━━━━━━━━━━━━━━━━━━━━ 18s 123ms/step - loss: 0.6854 - mae: 0.3183 - val_loss: 0.6810 - val_mae: 0.4840
# Graph plot
import matplotlib.pyplot as plt
plt.plot(best_model.history.history['loss'], label='Training loss')
plt.plot(best_model.history.history['val_loss'], label='Validation loss')
plt.plot(best_model.history.history['mae'], label='Training mae')
plt.plot(best_model.history.history['val_mae'], label='Validation mae')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('abalone-b.png')  # Save the graph
plt.show()

val_preds = best_model.predict(X_test)
36/36 ━━━━━━━━━━━━━━━━━━━━ 5s 94ms/step
val_preds1 = sc_price.inverse_transform(val_preds)
print(val_preds1)
[[25.536491]
 [13.221608]
 [23.90584 ]
 ...
 [ 7.229006]
 [23.007328]
 [27.940666]]